#ifndef CLONE_DOMAIN0
if ( d != dom0 )
BUG();
- if ( test_bit(DF_CONSTRUCTED, &d->flags) )
+ if ( test_bit(_DOMF_constructed, &d->domain_flags) )
BUG();
#endif
#endif
console_endboot(strstr(cmdline, "tty0") != NULL);
- set_bit(DF_CONSTRUCTED, &d->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
new_thread(ed, pkern_entry, 0, 0);
// FIXME: Hack for keyboard input
unsigned long pkern_entry;
#ifndef DOMU_AUTO_RESTART
- if ( test_bit(DF_CONSTRUCTED, &d->flags) ) BUG();
+ if ( test_bit(_DOMF_constructed, &d->domain_flags) ) BUG();
#endif
- printk("*** LOADING DOMAIN %d ***\n",d->id);
+ printk("*** LOADING DOMAIN %d ***\n",d->domain_id);
d->max_pages = dom0_size/PAGE_SIZE; // FIXME: use dom0 size
// FIXME: use domain0 command line
d->arch.mm = xmalloc(struct mm_struct);
if (unlikely(!d->arch.mm)) {
- printk("Can't allocate mm_struct for domain %d\n",d->id);
+ printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
return -ENOMEM;
}
memset(d->arch.mm, 0, sizeof(*d->arch.mm));
d->arch.mm->pgd = pgd_alloc(d->arch.mm);
if (unlikely(!d->arch.mm->pgd)) {
- printk("Can't allocate pgd for domain %d\n",d->id);
+ printk("Can't allocate pgd for domain %d\n",d->domain_id);
return -ENOMEM;
}
loaddomainelfimage(d,image_start);
printk("loaddomainelfimage returns\n");
- set_bit(DF_CONSTRUCTED, &d->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
printk("calling new_thread, entry=%p\n",pkern_entry);
#ifdef DOMU_AUTO_RESTART
{
/* re-copy the OS image to reset data values to original */
printk("reconstruct_domU: restarting domain %d...\n",
- ed->domain->id);
+ ed->domain->domain_id);
loaddomainelfimage(ed->domain,ed->domain->arch.image_start);
new_thread(ed, ed->domain->arch.entry, 0, 0);
}
else next++;
if (construct_domU(d, (unsigned long)domU_staging_area, size,0,0,0)) {
printf("launch_domainU: couldn't construct(id=%d,%lx,%lx)\n",
- d->id,domU_staging_area,size);
+ d->domain_id,domU_staging_area,size);
return 2;
}
domain_unpause_by_systemcontroller(d);
#define IDLE0_DOMAIN(_t) \
{ \
- id: IDLE_DOMAIN_ID, \
- flags: 1<<DF_IDLETASK, \
+ domain_id: IDLE_DOMAIN_ID, \
+ domain_flags:DOMF_idle_domain, \
refcnt: ATOMIC_INIT(1) \
}
+#define INIT_TASK(tsk) \
+{ \
+ /*processor: 0,*/ \
-+ /*id: IDLE_DOMAIN_ID,*/ \
-+ /*flags: 1<<DF_IDLETASK,*/ \
++ /*domain_id: IDLE_DOMAIN_ID,*/ \
++ /*domain_flags: DOMF_idle_domain,*/ \
+ refcnt: ATOMIC_INIT(1) \
+}
+#else
if (vcpu_deliverable_interrupts(ed)) {
unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
if (vcpu_timer_pending_early(ed))
-printf("*#*#*#* about to deliver early timer to domain %d!!!\n",ed->domain->id);
+printf("*#*#*#* about to deliver early timer to domain %d!!!\n",ed->domain->domain_id);
reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
}
}
{
//printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n");
//printk("@@@@@@ context switch from domain %d (%x) to domain %d (%x)\n",
-//prev->domain->id,(long)prev&0xffffff,next->domain->id,(long)next&0xffffff);
-//if (prev->domain->id == 1 && next->domain->id == 0) cs10foo();
-//if (prev->domain->id == 0 && next->domain->id == 1) cs01foo();
-//printk("@@sw %d->%d\n",prev->domain->id,next->domain->id);
+//prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff);
+//if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo();
+//if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo();
+//printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id);
switch_to(prev,next,prev);
// leave this debug for now: it acts as a heartbeat when more than
// one domain is active
{
static long cnt[16] = { 50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50};
static int i = 100;
-int id = ((struct exec_domain *)current)->domain->id & 0xf;
+int id = ((struct exec_domain *)current)->domain->domain_id & 0xf;
if (!cnt[id]--) { printk("%x",id); cnt[id] = 50; }
if (!i--) { printk("+",id); cnt[id] = 100; }
}
- clear_bit(EDF_RUNNING, &prev->flags);
+ clear_bit(_VCPUF_running, &prev->vcpu_flags);
//if (!is_idle_task(next->domain) )
//send_guest_virq(next, VIRQ_TIMER);
load_region_regs(current);
loop:
printf("$$$$$ PANIC in domain %d (k6=%p): ",
- ed->domain->id, ia64_get_kr(IA64_KR_CURRENT));
+ ed->domain->domain_id, ia64_get_kr(IA64_KR_CURRENT));
va_start(args, fmt);
(void)vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printf(buf);
if (regs) show_registers(regs);
domain_pause_by_systemcontroller(current->domain);
- set_bit(DF_CRASHED, ed->domain->flags);
- if (ed->domain->id == 0) {
+ set_bit(_DOMF_crashed, ed->domain->domain_flags);
+ if (ed->domain->domain_id == 0) {
int i = 1000000000L;
// if domain0 crashes, just periodically print out panic
// message to make post-mortem easier
if ( dom0 == NULL )
panic("Error creating domain 0\n");
- set_bit(DF_PRIVILEGED, &dom0->flags);
+ set_bit(_DOMF_privileged, &dom0->domain_flags);
/*
* We're going to setup domain0 using the module(s) that we stashed safely
{
APRINTK("Audit %d: type count went below zero "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
{
APRINTK("Audit %d: type count overflowed "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
{
APRINTK("Audit %d: general count went below zero "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
{
APRINTK("Audit %d: general count overflowed "
"mfn=%lx t=%x ot=%x",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->tlbflush_timestamp);
errors++;
"belonging to a domain %p (id=%d)\n",
l1mfn,
page_get_owner(l1page),
- page_get_owner(l1page)->id);
+ page_get_owner(l1page)->domain_id);
errors++;
continue;
}
{
printk("Audit %d: [Shadow L2 mfn=%lx i=%x] "
"Expected Shadow L1 t=%x mfn=%lx\n",
- d->id, mfn, i,
+ d->domain_id, mfn, i,
l1page->u.inuse.type_info, l1mfn);
errors++;
}
l1mfn,
page_get_owner(l1page),
(page_get_owner(l1page)
- ? page_get_owner(l1page)->id
+ ? page_get_owner(l1page)->domain_id
: -1));
errors++;
continue;
{
printk("Audit %d: [%x] Found %s Linear PT "
"t=%x mfn=%lx\n",
- d->id, i, (l1mfn==mfn) ? "Self" : "Other",
+ d->domain_id, i, (l1mfn==mfn) ? "Self" : "Other",
l1page->u.inuse.type_info, l1mfn);
}
else if ( page_type != PGT_l1_page_table )
{
printk("Audit %d: [L2 mfn=%lx i=%x] "
"Expected L1 t=%x mfn=%lx\n",
- d->id, mfn, i,
+ d->domain_id, mfn, i,
l1page->u.inuse.type_info, l1mfn);
errors++;
}
{
printk("Audit %d: [hl2mfn=%lx,i=%x] Skip foreign page "
"dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
- d->id, hl2mfn, i,
+ d->domain_id, hl2mfn, i,
page_get_owner(gpage),
- page_get_owner(gpage)->id,
+ page_get_owner(gpage)->domain_id,
gmfn,
gpage->count_info,
gpage->u.inuse.type_info);
{
printk("Audit %d: [l1mfn=%lx, i=%x] Illegal RW "
"t=%x mfn=%lx\n",
- d->id, l1mfn, i,
+ d->domain_id, l1mfn, i,
gpage->u.inuse.type_info, gmfn);
errors++;
}
{
printk("Audit %d: [l1mfn=%lx, i=%x] Illegal RW of "
"page table gmfn=%lx\n",
- d->id, l1mfn, i, gmfn);
+ d->domain_id, l1mfn, i, gmfn);
errors++;
}
}
{
printk("Audit %d: [l1mfn=%lx,i=%x] Skip foreign page "
"dom=%p (id=%d) mfn=%lx c=%08x t=%08x\n",
- d->id, l1mfn, i,
+ d->domain_id, l1mfn, i,
page_get_owner(gpage),
- page_get_owner(gpage)->id,
+ page_get_owner(gpage)->domain_id,
gmfn,
gpage->count_info,
gpage->u.inuse.type_info);
{
printk("Audit %d: found an L2 guest page "
"mfn=%lx t=%08x c=%08x while in shadow mode\n",
- d->id, mfn, page->u.inuse.type_info,
+ d->domain_id, mfn, page->u.inuse.type_info,
page->count_info);
errors++;
}
PGT_validated )
{
printk("Audit %d: L2 mfn=%lx not validated %08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
if ( (page->u.inuse.type_info & PGT_pinned) != PGT_pinned )
{
printk("Audit %d: L2 mfn=%lx not pinned t=%08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
}
PGT_validated )
{
printk("Audit %d: L1 not validated mfn=%lx t=%08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
errors++;
}
if ( !VM_ASSIST(d, VMASST_TYPE_writable_pagetables) )
{
printk("Audit %d: L1 mfn=%lx not pinned t=%08x\n",
- d->id, mfn, page->u.inuse.type_info);
+ d->domain_id, mfn, page->u.inuse.type_info);
}
}
}
if ( xenpages != d->xenheap_pages ||
totpages != d->tot_pages )
{
- printk("ARGH! dom %d: xen=%d %d, pages=%d %d\n", d->id,
+ printk("ARGH! dom %d: xen=%d %d, pages=%d %d\n", d->domain_id,
xenpages, d->xenheap_pages,
totpages, d->tot_pages );
}
if ( (pt[i] & _PAGE_PRESENT) && ((pt[i] >> PAGE_SHIFT) == xmfn) )
printk(" found dom=%d mfn=%lx t=%08x c=%08x "
"pt[i=%x]=%lx\n",
- d->id, mfn, page->u.inuse.type_info,
+ d->domain_id, mfn, page->u.inuse.type_info,
page->count_info, i, pt[i]);
}
{
printk("skipping audit domain of translated domain %d "
"from other context\n",
- d->id);
+ d->domain_id);
return;
}
if ( !(flags & AUDIT_QUIET) &&
((io_mappings > 0) || (lowmem_mappings > 0)) )
printk("Audit %d: Found %d lowmem mappings and %d io mappings\n",
- d->id, lowmem_mappings, io_mappings);
+ d->domain_id, lowmem_mappings, io_mappings);
/* PHASE 2 */
if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
{
printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
- d->id, page->u.inuse.type_info,
+ d->domain_id, page->u.inuse.type_info,
page->tlbflush_timestamp,
page->count_info, mfn);
errors++;
if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
{
printk("Audit %d: type count!=0 t=%x ot=%x c=%x mfn=%lx\n",
- d->id, page->u.inuse.type_info,
+ d->domain_id, page->u.inuse.type_info,
page->tlbflush_timestamp,
page->count_info, mfn);
//errors++;
if ( (page->count_info & PGC_count_mask) != 1 )
{
printk("Audit %d: gen count!=1 (c=%x) t=%x ot=%x mfn=%lx\n",
- d->id,
+ d->domain_id,
page->count_info,
page->u.inuse.type_info,
page->tlbflush_timestamp, mfn );
{
printk("Audit %d: shadow page counts wrong "
"mfn=%lx t=%08x c=%08x\n",
- d->id, page_to_pfn(page),
+ d->domain_id, page_to_pfn(page),
page->u.inuse.type_info,
page->count_info);
printk("a->gpfn_and_flags=%p\n",
if ( !(flags & AUDIT_QUIET) )
printk("Audit dom%d Done. "
"pages=%d oos=%d l1=%d l2=%d ctot=%d ttot=%d\n",
- d->id, page_count, oos_count, l1, l2, ctot, ttot);
+ d->domain_id, page_count, oos_count, l1, l2, ctot, ttot);
if ( !(flags & AUDIT_SHADOW_ALREADY_LOCKED) )
shadow_unlock(d);
#endif
c->flags = 0;
- if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
+ if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
c->flags |= VGCF_I387_VALID;
if ( KERNEL_MODE(ed, &ed->arch.guest_context.user_regs) )
c->flags |= VGCF_IN_KERNEL;
struct exec_domain *ed = current;
/* Just some sanity to ensure that the scheduler is set up okay. */
- ASSERT(ed->domain->id == IDLE_DOMAIN_ID);
+ ASSERT(ed->domain->domain_id == IDLE_DOMAIN_ID);
percpu_ctxt[smp_processor_id()].curr_ed = ed;
set_bit(smp_processor_id(), &ed->domain->cpuset);
domain_unpause_by_systemcontroller(ed->domain);
ed->arch.flags = TF_kernel_mode;
- if ( d->id != IDLE_DOMAIN_ID )
+ if ( d->domain_id != IDLE_DOMAIN_ID )
{
ed->arch.schedule_tail = continue_nonidle_task;
d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
ed->cpumap = CPUMAP_RUNANYWHERE;
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
struct domain *d = ed->domain;
ed->arch.schedule_tail = d->exec_domain[0]->arch.schedule_tail;
ed->arch.perdomain_ptes =
- d->arch.mm_perdomain_pt + (ed->id << PDPT_VCPU_SHIFT);
+ d->arch.mm_perdomain_pt + (ed->vcpu_id << PDPT_VCPU_SHIFT);
ed->arch.flags = TF_kernel_mode;
}
return -EINVAL;
}
- clear_bit(EDF_DONEFPUINIT, &ed->flags);
+ clear_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
if ( c->flags & VGCF_I387_VALID )
- set_bit(EDF_DONEFPUINIT, &ed->flags);
+ set_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags);
ed->arch.flags &= ~TF_kernel_mode;
if ( c->flags & VGCF_IN_KERNEL )
ed->arch.guest_context.user_regs.eflags |= EF_IE;
}
- if ( test_bit(EDF_DONEINIT, &ed->flags) )
+ if ( test_bit(_VCPUF_initialised, &ed->vcpu_flags) )
return 0;
if ( (rc = (int)set_fast_trap(ed, c->fast_trap_idx)) != 0 )
for ( i = 0; i < 8; i++ )
(void)set_debugreg(ed, i, c->debugreg[i]);
- if ( ed->id == 0 )
+ if ( ed->vcpu_id == 0 )
d->vm_assist = c->vm_assist;
phys_basetab = c->pt_base;
update_pagetables(ed);
/* Don't redo final setup */
- set_bit(EDF_DONEINIT, &ed->flags);
+ set_bit(_VCPUF_initialised, &ed->vcpu_flags);
return 0;
}
* 'prev' (after this point, a dying domain's info structure may be freed
* without warning).
*/
- clear_bit(EDF_RUNNING, &prev->flags);
+ clear_bit(_VCPUF_running, &prev->vcpu_flags);
schedule_tail(next);
BUG();
extern void translate_l2pgtable(struct domain *d, l1_pgentry_t *p2m, unsigned long l2mfn);
/* Sanity! */
- if ( d->id != 0 )
+ if ( d->domain_id != 0 )
BUG();
- if ( test_bit(DF_CONSTRUCTED, &d->flags) )
+ if ( test_bit(_DOMF_constructed, &d->domain_flags) )
BUG();
memset(&dsi, 0, sizeof(struct domain_setup_info));
/* DOM0 gets access to everything. */
physdev_init_dom0(d);
- set_bit(DF_CONSTRUCTED, &d->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
new_thread(ed, dsi.v_kernentry, vstack_end, vstartinfo_start);
__asm__ __volatile__ ( "fninit" );
if ( cpu_has_xmm )
load_mxcsr(0x1f80);
- set_bit(EDF_DONEFPUINIT, ¤t->flags);
+ set_bit(_VCPUF_fpu_initialised, ¤t->vcpu_flags);
}
void save_init_fpu(struct exec_domain *tsk)
* This causes us to set the real flag, so we'll need
* to temporarily clear it while saving f-p state.
*/
- if ( test_bit(EDF_GUEST_STTS, &tsk->flags) )
+ if ( test_bit(_VCPUF_guest_stts, &tsk->vcpu_flags) )
clts();
if ( cpu_has_fxsr )
"fnsave %0 ; fwait"
: "=m" (tsk->arch.guest_context.fpu_ctxt) );
- clear_bit(EDF_USEDFPU, &tsk->flags);
+ clear_bit(_VCPUF_fpu_dirtied, &tsk->vcpu_flags);
stts();
}
#include <asm/desc.h>
struct domain idle0_domain = {
- id: IDLE_DOMAIN_ID,
- flags: 1<<DF_IDLETASK,
+ domain_id: IDLE_DOMAIN_ID,
+ domain_flags:DOMF_idle_domain,
refcnt: ATOMIC_INIT(1)
};
#ifdef VERBOSE
#define MEM_LOG(_f, _a...) \
printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
- current->domain->id , __LINE__ , ## _a )
+ current->domain->domain_id , __LINE__ , ## _a )
#else
#define MEM_LOG(_f, _a...) ((void)0)
#endif
*/
dom_xen = alloc_domain_struct();
atomic_set(&dom_xen->refcnt, 1);
- dom_xen->id = DOMID_XEN;
+ dom_xen->domain_id = DOMID_XEN;
/*
* Initialise our DOMID_IO domain.
*/
dom_io = alloc_domain_struct();
atomic_set(&dom_io->refcnt, 1);
- dom_io->id = DOMID_IO;
+ dom_io->domain_id = DOMID_IO;
/* First 1MB of RAM is historically marked as I/O. */
for ( i = 0; i < 0x100; i++ )
* See domain.c:relinquish_list().
*/
ASSERT((x & PGT_validated) ||
- test_bit(DF_DYING, &page_get_owner(page)->flags));
+ test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
if ( unlikely((nx & PGT_count_mask) == 0) )
{
percpu_info[cpu].foreign = dom_io;
break;
default:
- MEM_LOG("Dom %u cannot set foreign dom\n", d->id);
+ MEM_LOG("Dom %u cannot set foreign dom\n", d->domain_id);
okay = 0;
break;
}
if ( shadow_mode_external(d) )
{
MEM_LOG("ignoring SET_LDT hypercall from external "
- "domain %u\n", d->id);
+ "domain %u\n", d->domain_id);
okay = 0;
break;
}
case MMUEXT_REASSIGN_PAGE:
if ( unlikely(!IS_PRIV(d)) )
{
- MEM_LOG("Dom %u has no reassignment priv", d->id);
+ MEM_LOG("Dom %u has no reassignment priv", d->domain_id);
okay = 0;
break;
}
* it is dying.
*/
ASSERT(e->tot_pages <= e->max_pages);
- if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
+ if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
unlikely(e->tot_pages == e->max_pages) ||
unlikely(IS_XEN_HEAP_FRAME(page)) )
{
MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
"page is in Xen heap (%lx), or dom is dying (%ld).\n",
- e->tot_pages, e->max_pages, op.mfn, e->flags);
+ e->tot_pages, e->max_pages, op.mfn, e->domain_flags);
okay = 0;
goto reassign_fail;
}
{
MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p,"
" caf=%08x, taf=%08x\n", page_to_pfn(page),
- d, d->id, unpickle_domptr(_nd), x,
+ d, d->domain_id, unpickle_domptr(_nd), x,
page->u.inuse.type_info);
okay = 0;
goto reassign_fail;
{
shadow_lock(FOREIGNDOM);
printk("privileged guest dom%d requests pfn=%lx to map mfn=%lx for dom%d\n",
- d->id, gpfn, mfn, FOREIGNDOM->id);
+ d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id);
set_machinetophys(mfn, gpfn);
set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache);
okay = 1;
int modified = 0, i;
#if 0
- if ( d->id )
+ if ( d->domain_id )
printk("%s: l1page mfn=%lx snapshot mfn=%lx\n", __func__,
l1e_get_pfn(linear_pg_table[l1_linear_offset((unsigned long)l1page)]),
l1e_get_pfn(linear_pg_table[l1_linear_offset((unsigned long)snapshot)]));
{
MEM_LOG("Bad page values %p: ed=%p(%u), sd=%p,"
" caf=%08x, taf=%08x\n", page_to_pfn(page),
- d, d->id, unpickle_domptr(_nd), x,
+ d, d->domain_id, unpickle_domptr(_nd), x,
page->u.inuse.type_info);
spin_unlock(&d->page_alloc_lock);
put_domain(e);
* Also, a domain mustn't have PGC_allocated pages when it is dying.
*/
ASSERT(e->tot_pages <= e->max_pages);
- if ( unlikely(test_bit(DF_DYING, &e->flags)) ||
+ if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
unlikely(e->tot_pages == e->max_pages) ||
unlikely(!gnttab_prepare_for_transfer(e, d, gntref)) )
{
BUG_ON(d->arch.iobmp_mask == NULL);
memset(d->arch.iobmp_mask, 0, IOBMP_BYTES);
- set_bit(DF_PHYSDEV, &d->flags);
+ set_bit(_DOMF_physdev_access, &d->domain_flags);
}
if ( dom0 == NULL )
panic("Error creating domain 0\n");
- set_bit(DF_PRIVILEGED, &dom0->flags);
+ set_bit(_DOMF_privileged, &dom0->domain_flags);
/* Grab the DOM0 command line. */
cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
{
printk("shadow_promote: get_page_type failed "
"dom%d gpfn=%lx gmfn=%lx t=%08lx\n",
- d->id, gpfn, gmfn, new_type);
+ d->domain_id, gpfn, gmfn, new_type);
okay = 0;
}
if ( unlikely(page == NULL) )
{
printk("Couldn't alloc shadow page! dom%d count=%d\n",
- d->id, d->arch.shadow_page_count);
+ d->domain_id, d->arch.shadow_page_count);
printk("Shadow table counts: l1=%d l2=%d hl2=%d snapshot=%d\n",
perfc_value(shadow_l1_pages),
perfc_value(shadow_l2_pages),
* Currently this does not fix up page ref counts, so it is valid to call
* only when a domain is being destroyed.
*/
- BUG_ON(!test_bit(DF_DYING, &d->flags) && shadow_mode_refcounts(d));
+ BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags) &&
+ shadow_mode_refcounts(d));
d->arch.shadow_tainted_refcnts = shadow_mode_refcounts(d);
free_shadow_pages(d);
if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
{
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l2e=%lx\n",
- d->id, gpfn, l2e_get_value(l2e));
+ d->domain_id, gpfn, l2e_get_value(l2e));
return INVALID_MFN;
}
unsigned long l1tab = l2e_get_phys(l2e);
#if 0
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => %lx phystab=%lx l2e=%lx l1tab=%lx, l1e=%lx\n",
- d->id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
+ d->domain_id, gpfn, l1_pgentry_val(l1e) >> PAGE_SHIFT, phystab, l2e, l1tab, l1e);
#endif
if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
{
printk("gpfn_to_mfn_foreign(d->id=%d, gpfn=%lx) => 0 l1e=%lx\n",
- d->id, gpfn, l1e_get_value(l1e));
+ d->domain_id, gpfn, l1e_get_value(l1e));
return INVALID_MFN;
}
{
printk("Couldn't alloc fullshadow snapshot for pfn=%lx mfn=%lx!\n"
"Dom%d snapshot_count_count=%d\n",
- gpfn, gmfn, d->id, d->arch.snapshot_page_count);
+ gpfn, gmfn, d->domain_id, d->arch.snapshot_page_count);
BUG(); /* XXX FIXME: try a shadow flush to free up some memory. */
}
{
printk("%s() failed, crashing domain %d "
"due to a read-only L2 page table (gpde=%lx), va=%lx\n",
- __func__, d->id, l2e_get_value(gpde), va);
+ __func__, d->domain_id, l2e_get_value(gpde), va);
domain_crash_synchronous();
}
FAILPT("bogus owner for snapshot page");
if ( page_get_owner(pfn_to_page(smfn)) != NULL )
FAILPT("shadow page mfn=0x%lx is owned by someone, domid=%d",
- smfn, page_get_owner(pfn_to_page(smfn))->id);
+ smfn, page_get_owner(pfn_to_page(smfn))->domain_id);
#if 0
if ( memcmp(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
shadow_status_noswap = 1;
sh_check_name = s;
- SH_VVLOG("%s-PT Audit domid=%d", s, d->id);
+ SH_VVLOG("%s-PT Audit domid=%d", s, d->domain_id);
sh_l2_present = sh_l1_present = 0;
perfc_incrc(check_all_pagetables);
ed = idle->exec_domain[0];
- set_bit(DF_IDLETASK, &idle->flags);
+ set_bit(_DOMF_idle_domain, &idle->domain_flags);
ed->arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
#ifndef NDEBUG
if ( (ed->arch.guest_context.trap_ctxt[trapnr].address == 0) &&
- (ed->domain->id == 0) )
+ (ed->domain->domain_id == 0) )
goto xen_fault;
#endif
#ifndef NDEBUG
if ( (ed->arch.guest_context.trap_ctxt[TRAP_page_fault].address == 0) &&
- (d->id == 0) )
+ (d->domain_id == 0) )
goto xen_fault;
#endif
if ( set )
{
- set_bit(EDF_GUEST_STTS, &ed->flags);
+ set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
stts();
}
else
{
- clear_bit(EDF_GUEST_STTS, &ed->flags);
- if ( test_bit(EDF_USEDFPU, &ed->flags) )
+ clear_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
+ if ( test_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
clts();
}
case 0: /* Read CR0 */
*reg =
(read_cr0() & ~X86_CR0_TS) |
- (test_bit(EDF_GUEST_STTS, &ed->flags) ? X86_CR0_TS : 0);
+ (test_bit(_VCPUF_guest_stts, &ed->vcpu_flags) ? X86_CR0_TS:0);
break;
case 2: /* Read CR2 */
#ifndef NDEBUG
if ( (ed->arch.guest_context.trap_ctxt[TRAP_gp_fault].address == 0) &&
- (ed->domain->id == 0) )
+ (ed->domain->domain_id == 0) )
goto gp_in_kernel;
#endif
setup_fpu(current);
- if ( test_and_clear_bit(EDF_GUEST_STTS, ¤t->flags) )
+ if ( test_and_clear_bit(_VCPUF_guest_stts, ¤t->vcpu_flags) )
{
struct trap_bounce *tb = ¤t->arch.trap_bounce;
tb->flags = TBF_EXCEPTION;
print_buf[index++] = c;
}
print_buf[index] = '\0';
- printk("(GUEST: %u) %s\n", d->domain->id, (char *) &print_buf);
+ printk("(GUEST: %u) %s\n", d->domain->domain_id, (char *) &print_buf);
index = 0;
}
else
}
__vmread(GUEST_EIP, &eip);
- TRACE_3D(TRC_VMX_VMEXIT, ed->domain->id, eip, exit_reason);
+ TRACE_3D(TRC_VMX_VMEXIT, ed->domain->domain_id, eip, exit_reason);
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
perfc_incra(cause_vector, vector);
- TRACE_3D(TRC_VMX_VECTOR, ed->domain->id, eip, vector);
+ TRACE_3D(TRC_VMX_VECTOR, ed->domain->domain_id, eip, vector);
switch (vector) {
#ifdef XEN_DEBUGGER
case TRAP_debug:
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
__vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, regs.error_code);
ed->arch.arch_vmx.cpu_cr2 = va;
- TRACE_3D(TRC_VMX_INT, ed->domain->id, TRAP_page_fault, va);
+ TRACE_3D(TRC_VMX_INT, ed->domain->domain_id, TRAP_page_fault, va);
}
break;
}
struct cpu_user_regs *regs = get_cpu_user_regs();
vmx_stts();
- set_bit(EDF_GUEST_STTS, &ed->flags);
+ set_bit(_VCPUF_guest_stts, &ed->vcpu_flags);
cpu = smp_processor_id();
{
ret = -EINVAL;
if ( (d != current->domain) &&
- test_bit(DF_CONSTRUCTED, &d->flags) )
+ test_bit(_DOMF_constructed, &d->domain_flags) )
{
domain_unpause_by_systemcontroller(d);
ret = 0;
ret = 0;
- op->u.createdomain.domain = d->id;
+ op->u.createdomain.domain = d->domain_id;
copy_to_user(u_dom0_op, op, sizeof(*op));
}
break;
ed->cpumap = cpumap;
if ( cpumap == CPUMAP_RUNANYWHERE )
- clear_bit(EDF_CPUPINNED, &ed->flags);
+ clear_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
else
{
/* pick a new cpu from the usable map */
exec_domain_pause(ed);
if ( ed->processor != new_cpu )
- set_bit(EDF_MIGRATED, &ed->flags);
- set_bit(EDF_CPUPINNED, &ed->flags);
+ set_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
+ set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
ed->processor = new_cpu;
exec_domain_unpause(ed);
}
for_each_domain ( d )
{
- if ( d->id >= op->u.getdomaininfo.domain )
+ if ( d->domain_id >= op->u.getdomaininfo.domain )
break;
}
read_unlock(&domlist_lock);
- op->u.getdomaininfo.domain = d->id;
+ op->u.getdomaininfo.domain = d->domain_id;
memset(&op->u.getdomaininfo.vcpu_to_cpu, -1,
sizeof(op->u.getdomaininfo.vcpu_to_cpu));
* - domain is marked as running if any of its vcpus is running
*/
for_each_exec_domain ( d, ed ) {
- op->u.getdomaininfo.vcpu_to_cpu[ed->id] = ed->processor;
- op->u.getdomaininfo.cpumap[ed->id] = ed->cpumap;
- if (!test_bit(EDF_CTRLPAUSE, &ed->flags))
+ op->u.getdomaininfo.vcpu_to_cpu[ed->vcpu_id] = ed->processor;
+ op->u.getdomaininfo.cpumap[ed->vcpu_id] = ed->cpumap;
+ if ( !(ed->vcpu_flags & VCPUF_ctrl_pause) )
flags &= ~DOMFLAGS_PAUSED;
- if (!test_bit(EDF_BLOCKED, &ed->flags))
+ if ( !(ed->vcpu_flags & VCPUF_blocked) )
flags &= ~DOMFLAGS_BLOCKED;
- if (test_bit(EDF_RUNNING, &ed->flags))
+ if ( ed->vcpu_flags & VCPUF_running )
flags |= DOMFLAGS_RUNNING;
if ( ed->cpu_time > cpu_time )
cpu_time += ed->cpu_time;
op->u.getdomaininfo.n_vcpu = vcpu_count;
op->u.getdomaininfo.flags = flags |
- (test_bit( DF_DYING, &d->flags) ? DOMFLAGS_DYING : 0) |
- (test_bit( DF_CRASHED, &d->flags) ? DOMFLAGS_CRASHED : 0) |
- (test_bit( DF_SHUTDOWN, &d->flags) ? DOMFLAGS_SHUTDOWN : 0) |
+ ((d->domain_flags & DOMF_dying) ? DOMFLAGS_DYING : 0) |
+ ((d->domain_flags & DOMF_crashed) ? DOMFLAGS_CRASHED : 0) |
+ ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
op->u.getdomaininfo.tot_pages = d->tot_pages;
__HYPERVISOR_dom_mem_op, \
(_op) | (i << START_EXTENT_SHIFT), \
extent_list, nr_extents, extent_order, \
- (d == current->domain) ? DOMID_SELF : d->id);
+ (d == current->domain) ? DOMID_SELF : d->domain_id);
static long
alloc_dom_mem(struct domain *d,
if ( unlikely((mpfn + j) >= max_page) )
{
DPRINTK("Domain %u page number out of range (%lx >= %lx)\n",
- d->id, mpfn + j, max_page);
+ d->domain_id, mpfn + j, max_page);
return i;
}
page = &frame_table[mpfn + j];
if ( unlikely(!get_page(page, d)) )
{
- DPRINTK("Bad page free for domain %u\n", d->id);
+ DPRINTK("Bad page free for domain %u\n", d->domain_id);
return i;
}
atomic_set(&d->refcnt, 1);
atomic_set(&ed->pausecnt, 0);
- d->id = dom_id;
+ d->domain_id = dom_id;
ed->processor = cpu;
spin_lock_init(&d->time_lock);
INIT_LIST_HEAD(&d->page_list);
INIT_LIST_HEAD(&d->xenpage_list);
- if ( (d->id != IDLE_DOMAIN_ID) &&
+ if ( (d->domain_id != IDLE_DOMAIN_ID) &&
((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
{
destroy_event_channels(d);
sched_add_domain(ed);
- if ( d->id != IDLE_DOMAIN_ID )
+ if ( d->domain_id != IDLE_DOMAIN_ID )
{
write_lock(&domlist_lock);
pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
- if ( (*pd)->id > d->id )
+ if ( (*pd)->domain_id > d->domain_id )
break;
d->next_in_list = *pd;
*pd = d;
d = domain_hash[DOMAIN_HASH(dom)];
while ( d != NULL )
{
- if ( d->id == dom )
+ if ( d->domain_id == dom )
{
if ( unlikely(!get_domain(d)) )
d = NULL;
struct exec_domain *ed;
domain_pause(d);
- if ( !test_and_set_bit(DF_DYING, &d->flags) )
+ if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
{
for_each_exec_domain(d, ed)
sched_rem_domain(ed);
{
struct domain *d = current->domain;
- if ( d->id == 0 )
+ if ( d->domain_id == 0 )
BUG();
- set_bit(DF_CRASHED, &d->flags);
+ set_bit(_DOMF_crashed, &d->domain_flags);
send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
{
struct domain *d = current->domain;
- if ( d->id == 0 )
+ if ( d->domain_id == 0 )
{
extern void machine_restart(char *);
extern void machine_halt(void);
}
if ( (d->shutdown_code = reason) == SHUTDOWN_crash )
- set_bit(DF_CRASHED, &d->flags);
+ set_bit(_DOMF_crashed, &d->domain_flags);
else
- set_bit(DF_SHUTDOWN, &d->flags);
+ set_bit(_DOMF_shutdown, &d->domain_flags);
send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
struct domain **pd;
atomic_t old, new;
- if ( !test_bit(DF_DYING, &d->flags) )
+ if ( !test_bit(_DOMF_dying, &d->domain_flags) )
BUG();
/* May be already destructed, or get_domain() can race us. */
while ( *pd != d )
pd = &(*pd)->next_in_list;
*pd = d->next_in_list;
- pd = &domain_hash[DOMAIN_HASH(d->id)];
+ pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
while ( *pd != d )
pd = &(*pd)->next_in_hashbucket;
*pd = d->next_in_hashbucket;
* of domains other than domain 0. ie. the domains that are being built by
* the userspace dom0 domain builder.
*/
-int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo)
+int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo)
{
int rc = 0;
struct vcpu_guest_context *c = NULL;
unsigned long vcpu = setdomaininfo->exec_domain;
struct exec_domain *ed;
- if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = p->exec_domain[vcpu]) == NULL) )
+ if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = d->exec_domain[vcpu]) == NULL) )
return -EINVAL;
- if (test_bit(DF_CONSTRUCTED, &p->flags) &&
- !test_bit(EDF_CTRLPAUSE, &ed->flags))
+ if (test_bit(_DOMF_constructed, &d->domain_flags) &&
+ !test_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags))
return -EINVAL;
if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
if ( (rc = arch_set_info_guest(ed, c)) != 0 )
goto out;
- set_bit(DF_CONSTRUCTED, &p->flags);
+ set_bit(_DOMF_constructed, &d->domain_flags);
out:
xfree(c);
sched_add_domain(ed);
/* domain_unpause_by_systemcontroller */
- if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
+ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
domain_wake(ed);
xfree(c);
max = d->max_event_channel;
chn = d->event_channel;
- for ( port = ed->id * EVENT_CHANNELS_SPREAD; port < max; port++ )
+ for ( port = ed->vcpu_id * EVENT_CHANNELS_SPREAD; port < max; port++ )
if ( chn[port].state == ECS_FREE )
break;
return -EINVAL;
if ( dom1 == DOMID_SELF )
- dom1 = current->domain->id;
+ dom1 = current->domain->domain_id;
if ( dom2 == DOMID_SELF )
- dom2 = current->domain->id;
+ dom2 = current->domain->domain_id;
if ( ((d1 = find_domain_by_id(dom1)) == NULL) ||
((d2 = find_domain_by_id(dom2)) == NULL) )
BUG();
chn2[port2].state = ECS_UNBOUND;
- chn2[port2].u.unbound.remote_domid = d1->id;
+ chn2[port2].u.unbound.remote_domid = d1->domain_id;
break;
default:
domid_t dom = close->dom;
if ( dom == DOMID_SELF )
- dom = current->domain->id;
+ dom = current->domain->domain_id;
else if ( !IS_PRIV(current->domain) )
return -EPERM;
long rc = 0;
if ( dom == DOMID_SELF )
- dom = current->domain->id;
+ dom = current->domain->domain_id;
else if ( !IS_PRIV(current->domain) )
return -EPERM;
case ECS_INTERDOMAIN:
status->status = EVTCHNSTAT_interdomain;
status->u.interdomain.dom =
- chn[port].u.interdomain.remote_dom->domain->id;
+ chn[port].u.interdomain.remote_dom->domain->domain_id;
status->u.interdomain.port = chn[port].u.interdomain.remote_port;
break;
case ECS_PIRQ:
u32 scombo, prev_scombo, new_scombo;
if ( unlikely((sflags & GTF_type_mask) != GTF_permit_access) ||
- unlikely(sdom != mapping_d->id) )
+ unlikely(sdom != mapping_d->domain_id) )
PIN_FAIL(unlock_out, GNTST_general_error,
"Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
- sflags, sdom, mapping_d->id);
+ sflags, sdom, mapping_d->domain_id);
/* Merge two 16-bit values into a 32-bit combined update. */
/* NB. Endianness! */
if ( op.dom == DOMID_SELF )
{
- op.dom = current->domain->id;
+ op.dom = current->domain->domain_id;
}
else if ( unlikely(!IS_PRIV(current->domain)) )
{
if ( op.dom == DOMID_SELF )
{
- op.dom = current->domain->id;
+ op.dom = current->domain->domain_id;
}
if ( unlikely((d = find_domain_by_id(op.dom)) == NULL) )
lgt = ld->grant_table;
#if GRANT_DEBUG_VERBOSE
- if ( ld->id != 0 )
+ if ( ld->domain_id != 0 )
{
DPRINTK("Foreign unref rd(%d) ld(%d) frm(%x) flgs(%x).\n",
- rd->id, ld->id, frame, readonly);
+ rd->domain_id, ld->domain_id, frame, readonly);
}
#endif
if ( get_domain(rd) == 0 )
{
- DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n", rd->id);
+ DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n",
+ rd->domain_id);
return 0;
}
/* gotcha */
DPRINTK("Grant unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
- rd->id, ld->id, frame, readonly);
+ rd->domain_id, ld->domain_id, frame, readonly);
if ( readonly )
act->pin -= GNTPIN_hstr_inc;
unsigned long target_pfn;
DPRINTK("gnttab_prepare_for_transfer rd(%hu) ld(%hu) ref(%hu).\n",
- rd->id, ld->id, ref);
+ rd->domain_id, ld->domain_id, ref);
if ( unlikely((rgt = rd->grant_table) == NULL) ||
unlikely(ref >= NR_GRANT_ENTRIES) )
{
- DPRINTK("Dom %d has no g.t., or ref is bad (%d).\n", rd->id, ref);
+ DPRINTK("Dom %d has no g.t., or ref is bad (%d).\n",
+ rd->domain_id, ref);
return 0;
}
}
if ( unlikely(sflags != GTF_accept_transfer) ||
- unlikely(sdom != ld->id) )
+ unlikely(sdom != ld->domain_id) )
{
DPRINTK("Bad flags (%x) or dom (%d). (NB. expected dom %d)\n",
- sflags, sdom, ld->id);
+ sflags, sdom, ld->domain_id);
goto fail;
}
unsigned long pfn;
DPRINTK("gnttab_notify_transfer rd(%hu) ld(%hu) ref(%hu).\n",
- rd->id, ld->id, ref);
+ rd->domain_id, ld->domain_id, ref);
sha = &rd->grant_table->shared[ref];
__phys_to_machine_mapping[pfn] = frame;
}
sha->frame = __mfn_to_gpfn(rd, frame);
- sha->domid = rd->id;
+ sha->domid = rd->domain_id;
wmb();
sha->flags = ( GTF_accept_transfer | GTF_transfer_completed );
for_each_domain ( d )
{
printk("Xen: DOM %u, flags=%lx refcnt=%d nr_pages=%d "
- "xenheap_pages=%d\n", d->id, d->flags,
+ "xenheap_pages=%d\n", d->domain_id, d->domain_flags,
atomic_read(&d->refcnt), d->tot_pages, d->xenheap_pages);
dump_pageframe_info(d);
printk("Guest: %p CPU %d [has=%c] flags=%lx "
"upcall_pend = %02x, upcall_mask = %02x\n", ed,
ed->processor,
- test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F',
- ed->flags,
+ test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F',
+ ed->vcpu_flags,
ed->vcpu_info->evtchn_upcall_pending,
ed->vcpu_info->evtchn_upcall_mask);
- printk("Notifying guest... %d/%d\n", d->id, ed->id);
+ printk("Notifying guest... %d/%d\n", d->domain_id, ed->vcpu_id);
printk("port %d/%d stat %d %d %d\n",
VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG],
test_bit(ed->virq_to_evtchn[VIRQ_DEBUG],
spin_lock(&d->page_alloc_lock);
- if ( unlikely(test_bit(DF_DYING, &d->flags)) ||
+ if ( unlikely(test_bit(_DOMF_dying, &d->domain_flags)) ||
unlikely((d->tot_pages + (1 << order)) > d->max_pages) )
{
DPRINTK("Over-allocation for domain %u: %u > %u\n",
- d->id, d->tot_pages + (1 << order), d->max_pages);
+ d->domain_id, d->tot_pages + (1 << order), d->max_pages);
DPRINTK("...or the domain is dying (%d)\n",
- !!test_bit(DF_DYING, &d->flags));
+ !!test_bit(_DOMF_dying, &d->domain_flags));
spin_unlock(&d->page_alloc_lock);
free_heap_pages(MEMZONE_DOM, pg, order);
return NULL;
spin_unlock_recursive(&d->page_alloc_lock);
- if ( likely(!test_bit(DF_DYING, &d->flags)) )
+ if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
{
free_heap_pages(MEMZONE_DOM, pg, order);
}
return -1;
memset(d->sched_priv, 0, sizeof(struct bvt_dom_info));
}
- ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->id];
- BVT_INFO(d)->ed_inf[ed->id].inf = BVT_INFO(d);
- BVT_INFO(d)->ed_inf[ed->id].exec_domain = ed;
+ ed->sched_priv = &BVT_INFO(d)->ed_inf[ed->vcpu_id];
+ BVT_INFO(d)->ed_inf[ed->vcpu_id].inf = BVT_INFO(d);
+ BVT_INFO(d)->ed_inf[ed->vcpu_id].exec_domain = ed;
return 0;
}
ASSERT(inf != NULL);
ASSERT(d != NULL);
- if (d->id == 0) {
+ if ( d->vcpu_id == 0 )
+ {
inf->mcu_advance = MCU_ADVANCE;
inf->domain = d->domain;
inf->warpback = 0;
einf->exec_domain = d;
- if ( d->domain->id == IDLE_DOMAIN_ID )
+ if ( d->domain->domain_id == IDLE_DOMAIN_ID )
{
einf->avt = einf->evt = ~0U;
}
bvt_add_task(ed);
- set_bit(EDF_RUNNING, &ed->flags);
+ set_bit(_VCPUF_running, &ed->vcpu_flags);
if ( !__task_on_runqueue(ed) )
__add_to_runqueue_head(ed);
/* Set the BVT parameters. AVT should always be updated
if CPU migration ocurred.*/
if ( einf->avt < CPU_SVT(cpu) ||
- unlikely(test_bit(EDF_MIGRATED, &ed->flags)) )
+ unlikely(test_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags)) )
einf->avt = CPU_SVT(cpu);
/* Deal with warping here. */
static void bvt_sleep(struct exec_domain *ed)
{
- if ( test_bit(EDF_RUNNING, &ed->flags) )
+ if ( test_bit(_VCPUF_running, &ed->vcpu_flags) )
cpu_raise_softirq(ed->processor, SCHEDULE_SOFTIRQ);
else if ( __task_on_runqueue(ed) )
__del_from_runqueue(ed);
list_for_each_entry ( ed_inf, queue, run_list )
{
ed = ed_inf->exec_domain;
- printk("%3d: %u has=%c ", loop++, ed->domain->id,
- test_bit(EDF_RUNNING, &ed->flags) ? 'T':'F');
+ printk("%3d: %u has=%c ", loop++, ed->domain->domain_id,
+ test_bit(_VCPUF_running, &ed->vcpu_flags) ? 'T':'F');
bvt_dump_runq_el(ed);
printk("c=0x%X%08X\n", (u32)(ed->cpu_time>>32), (u32)ed->cpu_time);
printk(" l: %p n: %p p: %p\n",
{
struct list_head *list = EXTRALIST(d,i);
ASSERT(extraq_on(d,i));
- PRINT(3, "Removing domain %i.%i from L%i extraq\n", d->domain->id,
- d->id, i);
+ PRINT(3, "Removing domain %i.%i from L%i extraq\n", d->domain->domain_id,
+ d->vcpu_id, i);
list_del(list);
list->next = NULL;
ASSERT(!extraq_on(d, i));
ASSERT(!extraq_on(d,i));
PRINT(3, "Adding domain %i.%i (score= %i, short_pen= %"PRIi64")"
" to L%i extraq\n",
- d->domain->id, d->id, EDOM_INFO(d)->score[i],
+ d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->score[i],
EDOM_INFO(d)->short_block_lost_tot, i);
/*iterate through all elements to find our "hole" and on our way
update all the other scores*/
break;
else
PRINT(4,"\tbehind domain %i.%i (score= %i)\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id, curinf->score[i]);
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id, curinf->score[i]);
}
/*cur now contains the element, before which we'll enqueue*/
PRINT(3, "\tlist_add to %p\n", cur->prev);
extralist[i]);
curinf->score[i] -= sub;
PRINT(4, "\tupdating domain %i.%i (score= %u)\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id, curinf->score[i]);
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id, curinf->score[i]);
}
ASSERT(extraq_on(d,i));
}
static inline void extraq_check(struct exec_domain *d) {
if (extraq_on(d, EXTRA_UTIL_Q)) {
- PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->id, d->id);
+ PRINT(2,"Dom %i.%i is on L1 extraQ\n",d->domain->domain_id, d->vcpu_id);
if (!(EDOM_INFO(d)->status & EXTRA_AWARE) &&
!extra_runs(EDOM_INFO(d))) {
extraq_del(d, EXTRA_UTIL_Q);
PRINT(2,"Removed dom %i.%i from L1 extraQ\n",
- d->domain->id, d->id);
+ d->domain->domain_id, d->vcpu_id);
}
} else {
- PRINT(2,"Dom %i.%i is NOT on L1 extraQ\n",d->domain->id,
- d->id);
+ PRINT(2,"Dom %i.%i is NOT on L1 extraQ\n",d->domain->domain_id,
+ d->vcpu_id);
if ((EDOM_INFO(d)->status & EXTRA_AWARE) && sedf_runnable(d))
{
#if (EXTRA == EXTRA_ROUNDR)
#elif
;
#endif
- PRINT(2,"Added dom %i.%i to L1 extraQ\n",d->domain->id,
- d->id);
+ PRINT(2,"Added dom %i.%i to L1 extraQ\n",d->domain->domain_id,
+ d->vcpu_id);
}
}
}
struct list_head *list = LIST(d);
ASSERT(__task_on_queue(d));
PRINT(3,"Removing domain %i.%i (bop= %"PRIu64") from runq/waitq\n",
- d->domain->id, d->id, PERIOD_BEGIN(EDOM_INFO(d)));
+ d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
list_del(list);
list->next = NULL;
ASSERT(!__task_on_queue(d));
static inline void __add_to_waitqueue_sort(struct exec_domain *d) {
ASSERT(!__task_on_queue(d));
PRINT(3,"Adding domain %i.%i (bop= %"PRIu64") to waitq\n",
- d->domain->id, d->id, PERIOD_BEGIN(EDOM_INFO(d)));
+ d->domain->domain_id, d->vcpu_id, PERIOD_BEGIN(EDOM_INFO(d)));
list_insert_sort(WAITQ(d->processor), LIST(d), waitq_comp);
ASSERT(__task_on_queue(d));
}
DOMAIN_COMPARER(runq, list, d1->deadl_abs, d2->deadl_abs)
static inline void __add_to_runqueue_sort(struct exec_domain *d) {
PRINT(3,"Adding domain %i.%i (deadl= %"PRIu64") to runq\n",
- d->domain->id, d->id, EDOM_INFO(d)->deadl_abs);
+ d->domain->domain_id, d->vcpu_id, EDOM_INFO(d)->deadl_abs);
list_insert_sort(RUNQ(d->processor), LIST(d), runq_comp);
}
/* Allocates memory for per domain private scheduling data*/
static int sedf_alloc_task(struct exec_domain *d) {
- PRINT(2,"sedf_alloc_task was called, domain-id %i.%i\n",d->domain->id,
- d->id);
+ PRINT(2,"sedf_alloc_task was called, domain-id %i.%i\n",d->domain->domain_id,
+ d->vcpu_id);
if (d->domain->sched_priv == NULL) {
if ((d->domain->sched_priv =
xmalloc(struct sedf_dom_info)) == NULL )
struct sedf_edom_info *inf = EDOM_INFO(d);
inf->exec_domain = d;
- PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->id,
- d->id);
+ PRINT(2,"sedf_add_task was called, domain-id %i.%i\n",d->domain->domain_id,
+ d->vcpu_id);
- if (d->domain->id==0) {
+ if (d->domain->domain_id==0) {
/*set dom0 to something useful to boot the machine*/
inf->period = MILLISECS(20);
inf->slice = MILLISECS(15);
INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q]));
INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q]));
- if (d->domain->id != IDLE_DOMAIN_ID) {
+ if (d->domain->domain_id != IDLE_DOMAIN_ID) {
extraq_check(d);
}
}
static void sedf_free_task(struct domain *d)
{
int i;
- PRINT(2,"sedf_free_task was called, domain-id %i\n",d->id);
+ PRINT(2,"sedf_free_task was called, domain-id %i\n",d->domain_id);
ASSERT(d->sched_priv != NULL);
xfree(d->sched_priv);
/* Initialises idle task */
static int sedf_init_idle_task(struct exec_domain *d) {
PRINT(2,"sedf_init_idle_task was called, domain-id %i.%i\n",
- d->domain->id, d->id);
+ d->domain->domain_id, d->vcpu_id);
if ( sedf_alloc_task(d) < 0 )
return -1;
sedf_add_task(d);
EDOM_INFO(d)->deadl_abs = 0;
EDOM_INFO(d)->status &= ~SEDF_ASLEEP;
- set_bit(EDF_RUNNING, &d->flags);
+ set_bit(_VCPUF_running, &d->vcpu_flags);
/*the idle task doesn't have to turn up on any list...*/
return 0;
}
list_for_each_safe(cur, tmp, waitq) {
curinf = list_entry(cur, struct sedf_edom_info, list);
PRINT(4,"\tLooking @ dom %i.%i\n",
- curinf->exec_domain->domain->id, curinf->exec_domain->id);
+ curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
if (PERIOD_BEGIN(curinf) <= now) {
__del_from_queue(curinf->exec_domain);
__add_to_runqueue_sort(curinf->exec_domain);
list_for_each_safe(cur, tmp, runq) {
curinf = list_entry(cur,struct sedf_edom_info,list);
PRINT(4,"\tLooking @ dom %i.%i\n",
- curinf->exec_domain->domain->id, curinf->exec_domain->id);
+ curinf->exec_domain->domain->domain_id, curinf->exec_domain->vcpu_id);
if (unlikely(curinf->slice == 0)) {
/*ignore domains with empty slice*/
PRINT(4,"\tUpdating zero-slice domain %i.%i\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id);
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id);
__del_from_queue(curinf->exec_domain);
/*move them to their next period*/
PRINT(4,"\tDomain %i.%i exceeded it's deadline/"
"slice (%"PRIu64" / %"PRIu64") now: %"PRIu64
" cputime: %"PRIu64"\n",
- curinf->exec_domain->domain->id,
- curinf->exec_domain->id,
+ curinf->exec_domain->domain->domain_id,
+ curinf->exec_domain->vcpu_id,
curinf->deadl_abs, curinf->slice, now,
curinf->cputime);
__del_from_queue(curinf->exec_domain);
/*inf->short_block_lost_tot -= EXTRA_QUANTUM;*/
inf->short_block_lost_tot -= now - inf->sched_start_abs;
PRINT(3,"Domain %i.%i: Short_block_loss: %"PRIi64"\n",
- inf->exec_domain->domain->id, inf->exec_domain->id,
+ inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id,
inf->short_block_lost_tot);
if (inf->short_block_lost_tot <= 0) {
PRINT(4,"Domain %i.%i compensated short block loss!\n",
- inf->exec_domain->domain->id, inf->exec_domain->id);
+ inf->exec_domain->domain->domain_id, inf->exec_domain->vcpu_id);
/*we have (over-)compensated our block penalty*/
inf->short_block_lost_tot = 0;
/*we don't want a place on the penalty queue anymore!*/
}
static void sedf_sleep(struct exec_domain *d) {
- PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->id, d->id);
+ PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
if (is_idle_task(d->domain))
return;
EDOM_INFO(d)->status |= SEDF_ASLEEP;
- if ( test_bit(EDF_RUNNING, &d->flags) ) {
+ if ( test_bit(_VCPUF_running, &d->vcpu_flags) ) {
#ifdef ADV_SCHED_HISTO
adv_sched_hist_start(d->processor);
#endif
s_time_t now = NOW();
struct sedf_edom_info* inf = EDOM_INFO(d);
- PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->id, d->id);
+ PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id);
if (unlikely(is_idle_task(d->domain)))
return;
if ( unlikely(__task_on_queue(d)) ) {
PRINT(3,"\tdomain %i.%i is already in some queue\n",
- d->domain->id, d->id);
+ d->domain->domain_id, d->vcpu_id);
return;
}
ASSERT(!sedf_runnable(d));
inf->deadl_abs = now + inf->slice;
PRINT(3,"waking up domain %i.%i (deadl= %"PRIu64" period= %"PRIu64" "\
- "now= %"PRIu64")\n", d->domain->id, d->id, inf->deadl_abs,
+ "now= %"PRIu64")\n", d->domain->domain_id, d->vcpu_id, inf->deadl_abs,
inf->period, now);
#ifdef SEDF_STATS
inf->block_tot++;
}
}
PRINT(3,"woke up domain %i.%i (deadl= %"PRIu64" period= %"PRIu64" "\
- "now= %"PRIu64")\n", d->domain->id, d->id, inf->deadl_abs,
+ "now= %"PRIu64")\n", d->domain->domain_id, d->vcpu_id, inf->deadl_abs,
inf->period, now);
if (PERIOD_BEGIN(inf) > now) {
__add_to_waitqueue_sort(d);
/*Print a lot of use-{full, less} information about a domains in the system*/
static void sedf_dump_domain(struct exec_domain *d) {
- printk("%i.%i has=%c ", d->domain->id, d->id,
- test_bit(EDF_RUNNING, &d->flags) ? 'T':'F');
+ printk("%i.%i has=%c ", d->domain->domain_id, d->vcpu_id,
+ test_bit(_VCPUF_running, &d->vcpu_flags) ? 'T':'F');
printk("p=%"PRIu64" sl=%"PRIu64" ddl=%"PRIu64" w=%hu c=%"PRIu64" sc=%i xtr(%s)=%"PRIu64" ew=%hu",
EDOM_INFO(d)->period, EDOM_INFO(d)->slice, EDOM_INFO(d)->deadl_abs,
EDOM_INFO(d)->weight, d->cpu_time, EDOM_INFO(d)->score[EXTRA_UTIL_Q],
PRINT(2,"sedf_adjdom was called, domain-id %i new period %"PRIu64" "\
"new slice %"PRIu64"\nlatency %"PRIu64" extra:%s\n",
- p->id, cmd->u.sedf.period, cmd->u.sedf.slice,
+ p->domain_id, cmd->u.sedf.period, cmd->u.sedf.slice,
cmd->u.sedf.latency, (cmd->u.sedf.extratime)?"yes":"no");
if ( cmd->direction == SCHED_INFO_PUT )
{
d->exec_domain[vcpu] = ed;
ed->domain = d;
- ed->id = vcpu;
+ ed->vcpu_id = vcpu;
if ( SCHED_OP(alloc_task, ed) < 0 )
goto out;
if ( vcpu != 0 )
{
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->id];
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->vcpu_id];
for_each_exec_domain( d, edc )
{
if ( (edc->next_in_list == NULL) ||
- (edc->next_in_list->id > vcpu) )
+ (edc->next_in_list->vcpu_id > vcpu) )
break;
}
ed->next_in_list = edc->next_in_list;
edc->next_in_list = ed;
- if (test_bit(EDF_CPUPINNED, &edc->flags)) {
+ if (test_bit(_VCPUF_cpu_pinned, &edc->vcpu_flags)) {
ed->processor = (edc->processor + 1) % smp_num_cpus;
- set_bit(EDF_CPUPINNED, &ed->flags);
+ set_bit(_VCPUF_cpu_pinned, &ed->vcpu_flags);
} else {
ed->processor = (edc->processor + 1) % smp_num_cpus; /* XXX */
}
struct domain *d = ed->domain;
/* Must be unpaused by control software to start execution. */
- set_bit(EDF_CTRLPAUSE, &ed->flags);
+ set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags);
- if ( d->id != IDLE_DOMAIN_ID )
+ if ( d->domain_id != IDLE_DOMAIN_ID )
{
/* Initialise the per-domain timer. */
init_ac_timer(&ed->timer);
}
SCHED_OP(add_task, ed);
- TRACE_2D(TRC_SCHED_DOM_ADD, d->id, ed->id);
+ TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, ed->vcpu_id);
}
void sched_rem_domain(struct exec_domain *ed)
{
rem_ac_timer(&ed->timer);
SCHED_OP(rem_task, ed);
- TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_DOM_REM, ed->domain->domain_id, ed->vcpu_id);
}
void init_idle_task(void)
SCHED_OP(sleep, ed);
spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_SLEEP, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_SLEEP, ed->domain->domain_id, ed->vcpu_id);
/* Synchronous. */
- while ( test_bit(EDF_RUNNING, &ed->flags) && !domain_runnable(ed) )
+ while ( test_bit(_VCPUF_running, &ed->vcpu_flags) && !domain_runnable(ed) )
cpu_relax();
}
ed->wokenup = NOW();
#endif
}
- clear_bit(EDF_MIGRATED, &ed->flags);
+ clear_bit(_VCPUF_cpu_migrated, &ed->vcpu_flags);
spin_unlock_irqrestore(&schedule_data[ed->processor].schedule_lock, flags);
- TRACE_2D(TRC_SCHED_WAKE, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_WAKE, ed->domain->domain_id, ed->vcpu_id);
}
/* Block the currently-executing domain until a pertinent event occurs. */
#endif
ed->vcpu_info->evtchn_upcall_mask = 0;
- set_bit(EDF_BLOCKED, &ed->flags);
+ set_bit(_VCPUF_blocked, &ed->vcpu_flags);
/* Check for events /after/ blocking: avoids wakeup waiting race. */
if ( event_pending(ed) )
{
- clear_bit(EDF_BLOCKED, &ed->flags);
+ clear_bit(_VCPUF_blocked, &ed->vcpu_flags);
}
else
{
- TRACE_2D(TRC_SCHED_BLOCK, ed->domain->id, ed->id);
+ TRACE_2D(TRC_SCHED_BLOCK, ed->domain->domain_id, ed->vcpu_id);
__enter_scheduler();
}
adv_sched_hist_start(current->processor);
#endif
- TRACE_2D(TRC_SCHED_YIELD, current->domain->id, current->id);
+ TRACE_2D(TRC_SCHED_YIELD, current->domain->domain_id, current->vcpu_id);
__enter_scheduler();
return 0;
}
case SCHEDOP_shutdown:
{
- TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->id, current->id,
+ TRACE_3D(TRC_SCHED_SHUTDOWN, current->domain->domain_id, current->vcpu_id,
(op >> SCHEDOP_reasonshift));
domain_shutdown((u8)(op >> SCHEDOP_reasonshift));
break;
spin_unlock(&schedule_data[cpu].schedule_lock);
__clear_cpu_bits(have_lock);
- TRACE_1D(TRC_SCHED_ADJDOM, d->id);
+ TRACE_1D(TRC_SCHED_ADJDOM, d->domain_id);
put_domain(d);
return 0;
}
add_ac_timer(&schedule_data[cpu].s_timer);
/* Must be protected by the schedule_lock! */
- set_bit(EDF_RUNNING, &next->flags);
+ set_bit(_VCPUF_running, &next->vcpu_flags);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
}
TRACE_4D(TRC_SCHED_SWITCH,
- prev->domain->id, prev->id,
- next->domain->id, next->id);
+ prev->domain->domain_id, prev->vcpu_id,
+ next->domain->domain_id, next->vcpu_id);
#ifdef ADV_SCHED_HISTO
adv_sched_hist_to_stop(cpu);
#ifndef VERBOSE
/* Only domain-0 may access the emergency console. */
- if ( current->domain->id != 0 )
+ if ( current->domain->domain_id != 0 )
return -EPERM;
#endif
extern unsigned long xenheap_phys_end; /* user-configurable */
#endif
-#define GDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + ((ed)->id << PDPT_VCPU_VA_SHIFT))
+#define GDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + ((ed)->vcpu_id << PDPT_VCPU_VA_SHIFT))
#define GDT_VIRT_END(ed) (GDT_VIRT_START(ed) + (64*1024))
-#define LDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->id << PDPT_VCPU_VA_SHIFT))
+#define LDT_VIRT_START(ed) (PERDOMAIN_VIRT_START + (64*1024) + ((ed)->vcpu_id << PDPT_VCPU_VA_SHIFT))
#define LDT_VIRT_END(ed) (LDT_VIRT_START(ed) + (64*1024))
#define PDPT_VCPU_SHIFT 5
{
struct exec_domain *ed = current;
- if ( !KERNEL_MODE(ed, regs) || (ed->domain->id == 0) )
+ if ( !KERNEL_MODE(ed, regs) || (ed->domain->domain_id == 0) )
return 0;
switch ( vector )
{
case TRAP_int3:
case TRAP_debug:
- set_bit(EDF_CTRLPAUSE, &ed->flags);
+ set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags);
raise_softirq(SCHEDULE_SOFTIRQ);
return 1;
}
extern void restore_fpu(struct exec_domain *tsk);
#define unlazy_fpu(_tsk) do { \
- if ( test_bit(EDF_USEDFPU, &(_tsk)->flags) ) \
+ if ( test_bit(_VCPUF_fpu_dirtied, &(_tsk)->vcpu_flags) ) \
save_init_fpu(_tsk); \
} while ( 0 )
/* Make domain the FPU owner */
static inline void setup_fpu(struct exec_domain *ed)
{
- if ( !test_and_set_bit(EDF_USEDFPU, &ed->flags) )
+ if ( !test_and_set_bit(_VCPUF_fpu_dirtied, &ed->vcpu_flags) )
{
- if ( test_bit(EDF_DONEFPUINIT, &ed->flags) )
+ if ( test_bit(_VCPUF_fpu_initialised, &ed->vcpu_flags) )
restore_fpu(ed);
else
init_fpu();
#ifdef VERBOSE
#define SH_LOG(_f, _a...) \
printk("DOM%uP%u: SH_LOG(%d): " _f "\n", \
- current->domain->id , current->processor, __LINE__ , ## _a )
+ current->domain->domain_id , current->processor, __LINE__ , ## _a )
#else
#define SH_LOG(_f, _a...) ((void)0)
#endif
#if SHADOW_VERBOSE_DEBUG
#define SH_VLOG(_f, _a...) \
printk("DOM%uP%u: SH_VLOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define SH_VLOG(_f, _a...) ((void)0)
#endif
#if SHADOW_VVERBOSE_DEBUG
#define SH_VVLOG(_f, _a...) \
printk("DOM%uP%u: SH_VVLOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define SH_VVLOG(_f, _a...) ((void)0)
#endif
#if SHADOW_VVVERBOSE_DEBUG
#define SH_VVVLOG(_f, _a...) \
printk("DOM%uP%u: SH_VVVLOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define SH_VVVLOG(_f, _a...) ((void)0)
#endif
#if FULLSHADOW_DEBUG
#define FSH_LOG(_f, _a...) \
printk("DOM%uP%u: FSH_LOG(%d): " _f "\n", \
- current->domain->id, current->processor, __LINE__ , ## _a )
+ current->domain->domain_id, current->processor, __LINE__ , ## _a )
#else
#define FSH_LOG(_f, _a...) ((void)0)
#endif
res = get_page_from_l1e(nl1e, owner);
printk("tried to map mfn %lx from domain %d into shadow page tables "
"of domain %d; %s\n",
- mfn, owner->id, d->id, res ? "success" : "failed");
+ mfn, owner->domain_id, d->domain_id,
+ res ? "success" : "failed");
}
if ( unlikely(!res) )
{
printk("d->id=%d gpfn=%lx gmfn=%lx stype=%lx c=%x t=%x "
"mfn_out_of_sync(gmfn)=%d mfn_is_page_table(gmfn)=%d\n",
- d->id, gpfn, gmfn, stype,
+ d->domain_id, gpfn, gmfn, stype,
frame_table[gmfn].count_info,
frame_table[gmfn].u.inuse.type_info,
mfn_out_of_sync(gmfn), mfn_is_page_table(gmfn));
set_bit(0, &ed->vcpu_info->evtchn_upcall_pending);
/*
- * NB1. 'flags' and 'processor' must be checked /after/ update of
+ * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
* pending flag. These values may fluctuate (after all, we hold no
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
*
- * NB2. We save DF_RUNNING across the unblock to avoid a needless
+ * NB2. We save VCPUF_running across the unblock to avoid a needless
* IPI for domains that we IPI'd to unblock.
*/
- running = test_bit(EDF_RUNNING, &ed->flags);
+ running = test_bit(_VCPUF_running, &ed->vcpu_flags);
exec_domain_unblock(ed);
if ( running )
smp_send_event_check_cpu(ed->processor);
struct exec_domain
{
- int id;
+ int vcpu_id;
int processor;
s_time_t wokenup; /* time domain got woken up */
void *sched_priv; /* scheduler-specific data */
- unsigned long flags;
+ unsigned long vcpu_flags;
u16 virq_to_evtchn[NR_VIRQS];
struct domain
{
- domid_t id;
+ domid_t domain_id;
shared_info_t *shared_info; /* shared data area */
spinlock_t time_lock;
unsigned int xenheap_pages; /* # pages allocated from Xen heap */
/* Scheduling. */
- int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
+ int shutdown_code; /* code value from OS (if DOMF_shutdown) */
void *sched_priv; /* scheduler-specific data */
struct domain *next_in_list;
u16 pirq_to_evtchn[NR_PIRQS];
u32 pirq_mask[NR_PIRQS/32];
- unsigned long flags;
+ unsigned long domain_flags;
unsigned long vm_assist;
atomic_t refcnt;
extern struct exec_domain *idle_task[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFU)
-#define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
+#define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags))
struct exec_domain *alloc_exec_domain_struct(struct domain *d,
unsigned long vcpu);
(_ed) != NULL; \
(_ed) = (_ed)->next_in_list )
-#define EDF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
-#define EDF_USEDFPU 1 /* Has this task used the FPU since last save? */
-#define EDF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
-#define EDF_BLOCKED 3 /* Domain is blocked waiting for an event. */
-#define EDF_CTRLPAUSE 4 /* Domain is paused by controller software. */
-#define EDF_RUNNING 5 /* Currently running on a CPU. */
-#define EDF_CPUPINNED 6 /* Disables auto-migration. */
-#define EDF_MIGRATED 7 /* Domain migrated between CPUs. */
-#define EDF_DONEINIT 8 /* Initialization completed . */
-
-#define DF_CONSTRUCTED 0 /* Has the guest OS been fully built yet? */
-#define DF_IDLETASK 1 /* Is this one of the per-CPU idle domains? */
-#define DF_PRIVILEGED 2 /* Is this domain privileged? */
-#define DF_PHYSDEV 3 /* May this domain do IO to physical devices? */
-#define DF_SHUTDOWN 4 /* Guest shut itself down for some reason. */
-#define DF_CRASHED 5 /* Domain crashed inside Xen, cannot continue. */
-#define DF_DYING 6 /* Death rattle. */
+/*
+ * Per-VCPU flags (vcpu_flags).
+ */
+ /* Has the FPU been initialised? */
+#define _VCPUF_fpu_initialised 0
+#define VCPUF_fpu_initialised (1UL<<_VCPUF_fpu_initialised)
+ /* Has the FPU been used since it was last saved? */
+#define _VCPUF_fpu_dirtied 1
+#define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied)
+ /* Has the guest OS requested 'stts'? */
+#define _VCPUF_guest_stts 2
+#define VCPUF_guest_stts (1UL<<_VCPUF_guest_stts)
+ /* Domain is blocked waiting for an event. */
+#define _VCPUF_blocked 3
+#define VCPUF_blocked (1UL<<_VCPUF_blocked)
+ /* Domain is paused by controller software. */
+#define _VCPUF_ctrl_pause 4
+#define VCPUF_ctrl_pause (1UL<<_VCPUF_ctrl_pause)
+ /* Currently running on a CPU? */
+#define _VCPUF_running 5
+#define VCPUF_running (1UL<<_VCPUF_running)
+ /* Disables auto-migration between CPUs. */
+#define _VCPUF_cpu_pinned 6
+#define VCPUF_cpu_pinned (1UL<<_VCPUF_cpu_pinned)
+ /* Domain migrated between CPUs. */
+#define _VCPUF_cpu_migrated 7
+#define VCPUF_cpu_migrated (1UL<<_VCPUF_cpu_migrated)
+ /* Initialization completed. */
+#define _VCPUF_initialised 8
+#define VCPUF_initialised (1UL<<_VCPUF_initialised)
+
+/*
+ * Per-domain flags (domain_flags).
+ */
+ /* Has the guest OS been fully built yet? */
+#define _DOMF_constructed 0
+#define DOMF_constructed (1UL<<_DOMF_constructed)
+ /* Is this one of the per-CPU idle domains? */
+#define _DOMF_idle_domain 1
+#define DOMF_idle_domain (1UL<<_DOMF_idle_domain)
+ /* Is this domain privileged? */
+#define _DOMF_privileged 2
+#define DOMF_privileged (1UL<<_DOMF_privileged)
+ /* May this domain do IO to physical devices? */
+#define _DOMF_physdev_access 3
+#define DOMF_physdev_access (1UL<<_DOMF_physdev_access)
+ /* Guest shut itself down for some reason. */
+#define _DOMF_shutdown 4
+#define DOMF_shutdown (1UL<<_DOMF_shutdown)
+ /* Domain has crashed and cannot continue to execute. */
+#define _DOMF_crashed 5
+#define DOMF_crashed (1UL<<_DOMF_crashed)
+ /* Death rattle. */
+#define _DOMF_dying 6
+#define DOMF_dying (1UL<<_DOMF_dying)
static inline int domain_runnable(struct exec_domain *ed)
{
return ( (atomic_read(&ed->pausecnt) == 0) &&
- !(ed->flags & ((1<<EDF_BLOCKED)|(1<<EDF_CTRLPAUSE))) &&
- !(ed->domain->flags & ((1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
+ !(ed->vcpu_flags & (VCPUF_blocked|VCPUF_ctrl_pause)) &&
+ !(ed->domain->domain_flags & (DOMF_shutdown|DOMF_crashed)) );
}
static inline void exec_domain_pause(struct exec_domain *ed)
static inline void exec_domain_unblock(struct exec_domain *ed)
{
- if ( test_and_clear_bit(EDF_BLOCKED, &ed->flags) )
+ if ( test_and_clear_bit(_VCPUF_blocked, &ed->vcpu_flags) )
domain_wake(ed);
}
for_each_exec_domain ( d, ed )
{
ASSERT(ed != current);
- if ( !test_and_set_bit(EDF_CTRLPAUSE, &ed->flags) )
+ if ( !test_and_set_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
domain_sleep(ed);
}
for_each_exec_domain ( d, ed )
{
- if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->flags) )
+ if ( test_and_clear_bit(_VCPUF_ctrl_pause, &ed->vcpu_flags) )
domain_wake(ed);
}
}
-
-#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
-#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
+#define IS_PRIV(_d) \
+ (test_bit(_DOMF_privileged, &(_d)->domain_flags))
+#define IS_CAPABLE_PHYSDEV(_d) \
+ (test_bit(_DOMF_physdev_access, &(_d)->domain_flags))
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))